struct rcu_head rcu;
};
+static DEFINE_RCU_READ_LOCK(msixtbl_rcu_lock);
+
static struct msixtbl_entry *msixtbl_find_entry(
struct vcpu *v, unsigned long addr)
{
void *virt;
int r = X86EMUL_UNHANDLEABLE;
- rcu_read_lock();
+ rcu_read_lock(&msixtbl_rcu_lock);
if ( len != 4 )
goto out;
r = X86EMUL_OKAY;
out:
- rcu_read_unlock();
+ rcu_read_unlock(&msixtbl_rcu_lock);
return r;
}
int nr_entry;
int r = X86EMUL_UNHANDLEABLE;
- rcu_read_lock();
+ rcu_read_lock(&msixtbl_rcu_lock);
if ( len != 4 )
goto out;
r = X86EMUL_OKAY;
out:
- rcu_read_unlock();
+ rcu_read_unlock(&msixtbl_rcu_lock);
return r;
}
struct msixtbl_entry *entry;
void *virt;
- rcu_read_lock();
+ rcu_read_lock(&msixtbl_rcu_lock);
entry = msixtbl_find_entry(v, addr);
virt = msixtbl_addr_to_virt(entry, addr);
- rcu_read_unlock();
+ rcu_read_unlock(&msixtbl_rcu_lock);
return !!virt;
}
*
* It is illegal to block while in an RCU read-side critical section.
*/
-#define rcu_read_lock(x) do { } while (0)
+#define rcu_read_lock(x) ((void)(x))
/**
* rcu_read_unlock - marks the end of an RCU read-side critical section.
*
* See rcu_read_lock() for more information.
*/
-#define rcu_read_unlock(x) do { } while (0)
+#define rcu_read_unlock(x) ((void)(x))
/*
* So where is rcu_write_lock()? It does not exist, as there is no
unsigned long symtab_len;
};
+/* Protect updates/reads (resp.) of domain_list and domain_hash. */
+extern spinlock_t domlist_update_lock;
+extern rcu_read_lock_t domlist_read_lock;
+
extern struct vcpu *idle_vcpu[NR_CPUS];
#define IDLE_DOMAIN_ID (0x7FFFU)
#define is_idle_domain(d) ((d)->domain_id == IDLE_DOMAIN_ID)
local_events_need_delivery() \
))
-/* Protect updates/reads (resp.) of domain_list and domain_hash. */
-extern spinlock_t domlist_update_lock;
-extern rcu_read_lock_t domlist_read_lock;
-
extern struct domain *domain_list;
/* Caller must hold the domlist_read_lock or domlist_update_lock. */
static struct avc_cache avc_cache;
static struct avc_callback_node *avc_callbacks;
+static DEFINE_RCU_READ_LOCK(avc_rcu_lock);
+
static inline int avc_hash(u32 ssid, u32 tsid, u16 tclass)
{
return (ssid ^ (tsid<<2) ^ (tclass<<4)) & (AVC_CACHE_SLOTS - 1);
struct avc_node *node;
struct hlist_head *head;
- rcu_read_lock();
+ rcu_read_lock(&avc_rcu_lock);
slots_used = 0;
max_chain_len = 0;
}
}
- rcu_read_unlock();
+ rcu_read_unlock(&avc_rcu_lock);
return snprintf(buf, size, "entries: %d\nbuckets used: %d/%d\n"
"longest chain: %d\n",
lock = &avc_cache.slots_lock[hvalue];
spin_lock_irqsave(&avc_cache.slots_lock[hvalue], flags);
- rcu_read_lock();
+ rcu_read_lock(&avc_rcu_lock);
hlist_for_each_entry(node, next, head, list)
{
avc_node_delete(node);
ecx++;
if ( ecx >= AVC_CACHE_RECLAIM )
{
- rcu_read_unlock();
+ rcu_read_unlock(&avc_rcu_lock);
spin_unlock_irqrestore(lock, flags);
goto out;
}
}
- rcu_read_unlock();
+ rcu_read_unlock(&avc_rcu_lock);
spin_unlock_irqrestore(lock, flags);
}
out:
lock = &avc_cache.slots_lock[i];
spin_lock_irqsave(lock, flag);
- rcu_read_lock();
+ rcu_read_lock(&avc_rcu_lock);
hlist_for_each_entry(node, next, head, list)
avc_node_delete(node);
- rcu_read_unlock();
+ rcu_read_unlock(&avc_rcu_lock);
spin_unlock_irqrestore(lock, flag);
}
BUG_ON(!requested);
- rcu_read_lock();
+ rcu_read_lock(&avc_rcu_lock);
node = avc_lookup(ssid, tsid, tclass);
if ( !node )
{
- rcu_read_unlock();
+ rcu_read_unlock(&avc_rcu_lock);
if ( in_avd )
avd = in_avd;
rc = security_compute_av(ssid,tsid,tclass,requested,avd);
if ( rc )
goto out;
- rcu_read_lock();
+ rcu_read_lock(&avc_rcu_lock);
node = avc_insert(ssid,tsid,tclass,avd);
} else {
if ( in_avd )
rc = -EACCES;
}
- rcu_read_unlock();
+ rcu_read_unlock(&avc_rcu_lock);
out:
return rc;
}